static void write_msr_for(void *unused)
{
if (((1 << current->processor) & msr_cpu_mask))
- wrmsr(msr_addr, msr_lo, msr_hi);
+ (void)wrmsr_user(msr_addr, msr_lo, msr_hi);
}
static void read_msr_for(void *unused)
{
if (((1 << current->processor) & msr_cpu_mask))
- rdmsr(msr_addr, msr_lo, msr_hi);
+ (void)rdmsr_user(msr_addr, msr_lo, msr_hi);
}
long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
break;
case 0x09: /* WBINVD */
+ /* Ignore the instruction if unprivileged. */
if ( !IS_CAPABLE_PHYSDEV(ed->domain) )
- {
DPRINTK("Non-physdev domain attempted WBINVD.\n");
- goto fail;
- }
- wbinvd();
+ else
+ wbinvd();
break;
case 0x20: /* MOV CR?,<reg> */
break;
case 0x30: /* WRMSR */
+ /* Ignore the instruction if unprivileged. */
if ( !IS_PRIV(ed->domain) )
- {
- DPRINTK("Non-priv domain attempted WRMSR.\n");
+ DPRINTK("Non-priv domain attempted WRMSR(%p,%08lx,%08lx).\n",
+ regs->ecx, (long)regs->eax, (long)regs->edx);
+ else if ( wrmsr_user(regs->ecx, regs->eax, regs->edx) )
goto fail;
- }
- wrmsr(regs->ecx, regs->eax, regs->edx);
break;
case 0x32: /* RDMSR */
if ( !IS_PRIV(ed->domain) )
- {
- DPRINTK("Non-priv domain attempted RDMSR.\n");
+ DPRINTK("Non-priv domain attempted RDMSR(%p,%08lx,%08lx).\n",
+ regs->ecx, (long)regs->eax, (long)regs->edx);
+ /* Everyone can read the MSR space. */
+ if ( rdmsr_user(regs->ecx, regs->eax, regs->edx) )
goto fail;
- }
- rdmsr(regs->ecx, regs->eax, regs->edx);
break;
default:
#define __GUEST_SS 0x082b
/* For generic assembly code: use macros to define operation/operand sizes. */
-#define __OS "q" /* Operation Suffix */
-#define __OP "r" /* Operand Prefix */
+#define __OS "q" /* Operation Suffix */
+#define __OP "r" /* Operand Prefix */
+#define __FIXUP_ALIGN ".align 8"
+#define __FIXUP_WORD ".quad"
#elif defined(__i386__)
#define __HYPERVISOR_DS 0x0810
/* For generic assembly code: use macros to define operation/operand sizes. */
-#define __OS "l" /* Operation Suffix */
-#define __OP "e" /* Operand Prefix */
+#define __OS "l" /* Operation Suffix */
+#define __OP "e" /* Operand Prefix */
+#define __FIXUP_ALIGN ".align 4"
+#define __FIXUP_WORD ".long"
#endif /* __i386__ */
#ifndef __ASM_MSR_H
#define __ASM_MSR_H
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
#define rdmsr(msr,val1,val2) \
__asm__ __volatile__("rdmsr" \
: "=a" (val1), "=d" (val2) \
: "c" (msr))
-#define rdmsrl(msr,val) do { unsigned long a__,b__; \
- __asm__ __volatile__("rdmsr" \
- : "=a" (a__), "=d" (b__) \
- : "c" (msr)); \
- val = a__ | (b__<<32); \
-} while(0);
-
#define wrmsr(msr,val1,val2) \
__asm__ __volatile__("wrmsr" \
: /* no outputs */ \
: "c" (msr), "a" (val1), "d" (val2))
+#define rdmsr_user(msr,val1,val2) ({\
+ int _rc = 0; \
+ __asm__ __volatile__( \
+ "1: rdmsr\n2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $1,%2\n; jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " "__FIXUP_ALIGN"\n" \
+ " "__FIXUP_WORD" 1b,3b\n" \
+ ".previous\n" \
+ : "=a" (val1), "=d" (val2), "=r" (_rc) \
+ : "c" (msr)); \
+ _rc; })
+
+#define wrmsr_user(msr,val1,val2) ({\
+ int _rc = 0; \
+ __asm__ __volatile__( \
+ "1: wrmsr\n2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $1,%0\n; jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " "__FIXUP_ALIGN"\n" \
+ " "__FIXUP_WORD" 1b,3b\n" \
+ ".previous\n" \
+ : "=r" (_rc) \
+ : "c" (msr), "a" (val1), "d" (val2)); \
+ _rc; })
+
#define rdtsc(low,high) \
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))